From b90262d6a4c43d9f7d34dd06da6d103083d4713d Mon Sep 17 00:00:00 2001 From: "iap10@freefall.cl.cam.ac.uk" Date: Mon, 14 Feb 2005 02:27:01 +0000 Subject: [PATCH] bitkeeper revision 1.1199 (42100c75YrRV-rqA2PA8zYLZf22hrw) Further shadow_mode cleanups in preparation for the new implementation of translate mode. Signed-off-by: ian@xensource.com --- xen/arch/x86/domain.c | 15 ++-- xen/arch/x86/mm.c | 32 +++++--- xen/arch/x86/shadow.c | 29 +++---- xen/arch/x86/x86_32/domain_build.c | 2 +- xen/include/asm-x86/shadow.h | 128 ++++++++++++++--------------- xen/include/public/dom0_ops.h | 2 +- 6 files changed, 106 insertions(+), 102 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index d01c8765e3..dd496ee31a 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -404,10 +404,15 @@ static int vmx_final_setup_guest(struct exec_domain *ed, /* Put the domain in shadow mode even though we're going to be using * the shared 1:1 page table initially. It shouldn't hurt */ - shadow_mode_enable(ed->domain, SHM_full_32); + shadow_mode_enable(ed->domain, SHM_enable|SHM_translate|SHM_external); } - update_pagetables(ed); /* this assigns shadow_pagetable */ + /* We don't call update_pagetables() as we actively want fields such as + * the linear_pg_table to be null so that we bail out early of + * shadow_fault in case the vmx guest tries illegal accesses with + * paging turned of. + */ + //update_pagetables(ed); /* this assigns shadow_pagetable */ alloc_monitor_pagetable(ed); /* this assigns monitor_pagetable */ return 0; @@ -502,11 +507,7 @@ int arch_final_setup_guest( return vmx_final_setup_guest(d, c); #endif - /* We don't call update_pagetables() as we actively want fields such as - * the linear_pg_table to be null so that we bail out early of - * shadow_fault in case the vmx guest tries illegal accesses with - * paging turned of. - */ + update_pagetables(d); return 0; } diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 47284759a7..b0f4951d6c 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -1052,7 +1052,7 @@ void free_page_type(struct pfn_info *page, unsigned int type) BUG(); } - if ( unlikely(shadow_mode(d)) && + if ( unlikely(shadow_mode_enabled(d)) && (get_shadow_status(d, page_to_pfn(page)) & PSH_shadowed) ) { unshadow_table(page_to_pfn(page), type); @@ -1653,9 +1653,12 @@ int do_mmu_update( cleanup_writable_pagetable(d); - if ( unlikely(shadow_mode(d)) ) + if ( unlikely(shadow_mode_enabled(d)) ) check_pagetable(d, ed->arch.guest_table, "pre-mmu"); /* debug */ + if ( unlikely(shadow_mode_translate(d) ) ) + domain_crash(); + /* * If we are resuming after preemption, read how much work we have already * done. This allows us to set the @done output parameter correctly. @@ -1750,7 +1753,7 @@ int do_mmu_update( okay = mod_l1_entry((l1_pgentry_t *)va, mk_l1_pgentry(req.val)); - if ( unlikely(shadow_mode(d)) && okay && + if ( unlikely(shadow_mode_enabled(d)) && okay && (get_shadow_status(d, page-frame_table) & PSH_shadowed) ) { @@ -1769,7 +1772,7 @@ int do_mmu_update( mk_l2_pgentry(req.val), pfn); - if ( unlikely(shadow_mode(d)) && okay && + if ( unlikely(shadow_mode_enabled(d)) && okay && (get_shadow_status(d, page-frame_table) & PSH_shadowed) ) { @@ -1788,7 +1791,7 @@ int do_mmu_update( mk_l3_pgentry(req.val), pfn); - if ( unlikely(shadow_mode(d)) && okay && + if ( unlikely(shadow_mode_enabled(d)) && okay && (get_shadow_status(d, page-frame_table) & PSH_shadowed) ) { @@ -1806,7 +1809,7 @@ int do_mmu_update( mk_l4_pgentry(req.val), pfn); - if ( unlikely(shadow_mode(d)) && okay && + if ( unlikely(shadow_mode_enabled(d)) && okay && (get_shadow_status(d, page-frame_table) & PSH_shadowed) ) { @@ -1845,7 +1848,7 @@ int do_mmu_update( * If in log-dirty mode, mark the corresponding pseudo-physical * page as dirty. */ - if ( unlikely(shadow_mode(d) == SHM_logdirty) && + if ( unlikely(shadow_mode_log_dirty(d)) && mark_dirty(d, pfn) ) d->arch.shadow_dirty_block_count++; @@ -1901,7 +1904,7 @@ int do_mmu_update( if ( unlikely(pdone != NULL) ) __put_user(done + i, pdone); - if ( unlikely(shadow_mode(d)) ) + if ( unlikely(shadow_mode_enabled(d)) ) check_pagetable(d, ed->arch.guest_table, "post-mmu"); /* debug */ UNLOCK_BIGLOCK(d); @@ -1924,6 +1927,9 @@ int do_update_va_mapping(unsigned long va, if ( unlikely(!__addr_ok(va)) ) return -EINVAL; + if ( unlikely(shadow_mode_translate(d) ) ) + domain_crash(); + LOCK_BIGLOCK(d); cleanup_writable_pagetable(d); @@ -1937,7 +1943,7 @@ int do_update_va_mapping(unsigned long va, mk_l1_pgentry(val))) ) err = -EINVAL; - if ( unlikely(shadow_mode(d)) ) + if ( unlikely(shadow_mode_enabled(d)) ) { unsigned long sval = 0; @@ -1974,7 +1980,7 @@ int do_update_va_mapping(unsigned long va, * the PTE in the PT-holding page. We need the machine frame number * for this. */ - if ( shadow_mode(d) == SHM_logdirty ) + if ( shadow_mode_log_dirty(d) ) mark_dirty(d, va_to_l1mfn(va)); check_pagetable(d, ed->arch.guest_table, "va"); /* debug */ @@ -2247,7 +2253,7 @@ void ptwr_flush(const int which) PTWR_PRINT_WHICH, ptep, pte); pte &= ~_PAGE_RW; - if ( unlikely(shadow_mode(d)) ) + if ( unlikely(shadow_mode_enabled(d)) ) { /* Write-protect the p.t. page in the shadow page table. */ l1pte_propagate_from_guest(d, &pte, &spte); @@ -2339,7 +2345,7 @@ void ptwr_flush(const int which) * STEP 3. Reattach the L1 p.t. page into the current address space. */ - if ( (which == PTWR_PT_ACTIVE) && likely(!shadow_mode(d)) ) + if ( (which == PTWR_PT_ACTIVE) && likely(!shadow_mode_enabled(d)) ) { pl2e = &linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx]; *pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT); @@ -2448,7 +2454,7 @@ int ptwr_do_page_fault(unsigned long addr) /* For safety, disconnect the L1 p.t. page from current space. */ if ( (which == PTWR_PT_ACTIVE) && - likely(!shadow_mode(current->domain)) ) + likely(!shadow_mode_enabled(current->domain)) ) { *pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT); #if 1 diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c index f9aed4315b..d676317843 100644 --- a/xen/arch/x86/shadow.c +++ b/xen/arch/x86/shadow.c @@ -57,6 +57,8 @@ void free_shadow_state(struct domain *d) shadow_audit(d, 1); + if( !d->arch.shadow_ht ) return; + /* Free each hash chain in turn. */ for ( i = 0; i < shadow_ht_buckets; i++ ) { @@ -114,7 +116,7 @@ static inline int clear_shadow_page( /* We clear L2 pages by zeroing the guest entries. */ case PGT_l2_page_table: p = map_domain_mem((spage - frame_table) << PAGE_SHIFT); - if ( shadow_mode(d) == SHM_full_32 ) + if ( shadow_mode_external(d) ) memset(p, 0, L2_PAGETABLE_ENTRIES * sizeof(*p)); else memset(p, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*p)); @@ -169,6 +171,8 @@ void shadow_mode_init(void) int __shadow_mode_enable(struct domain *d, unsigned int mode) { + d->arch.shadow_mode = mode; + if (!d->arch.shadow_ht) { d->arch.shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets); @@ -179,7 +183,7 @@ int __shadow_mode_enable(struct domain *d, unsigned int mode) shadow_ht_buckets * sizeof(struct shadow_status)); } - if ( mode == SHM_logdirty && !d->arch.shadow_dirty_bitmap) + if ( shadow_mode_log_dirty(d) && !d->arch.shadow_dirty_bitmap) { d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63; d->arch.shadow_dirty_bitmap = @@ -194,8 +198,6 @@ int __shadow_mode_enable(struct domain *d, unsigned int mode) d->arch.shadow_dirty_bitmap_size/8); } - d->arch.shadow_mode = mode; - return 0; nomem: @@ -389,17 +391,17 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc) break; case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST: - shadow_mode_disable(d); - rc = __shadow_mode_enable(d, SHM_test); + free_shadow_state(d); + rc = __shadow_mode_enable(d, SHM_enable); break; case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY: - shadow_mode_disable(d); - rc = __shadow_mode_enable(d, SHM_logdirty); + free_shadow_state(d); + rc = __shadow_mode_enable(d, d->arch.shadow_mode|SHM_log_dirty); break; default: - rc = shadow_mode(d) ? shadow_mode_table_op(d, sc) : -EINVAL; + rc = shadow_mode_enabled(d) ? shadow_mode_table_op(d, sc) : -EINVAL; break; } @@ -488,7 +490,7 @@ unsigned long shadow_l2_table( #ifdef __i386__ /* Install hypervisor and 2x linear p.t. mapings. */ - if ( shadow_mode(d) == SHM_full_32 ) + if ( shadow_mode_translate(d) ) { #ifdef CONFIG_VMX vmx_update_shadow_state(d->exec_domain[0], gpfn, spfn); @@ -519,11 +521,10 @@ unsigned long shadow_l2_table( mk_l2_pgentry(__pa(page_get_owner( &frame_table[gpfn])->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR); - } -#endif - if ( shadow_mode(d) != SHM_full_32 ) unmap_domain_mem(spl2e); + } +#endif SH_VLOG("shadow_l2_table( %p -> %p)", gpfn, spfn); return spfn; @@ -954,7 +955,7 @@ int check_l2_table( L2_PAGETABLE_SHIFT]), (smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); - if ( shadow_mode(d) != SHM_full_32 ) { + if ( !shadow_mode_translate(d) ) { if ( (l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]) != ((v2m(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR))) ) diff --git a/xen/arch/x86/x86_32/domain_build.c b/xen/arch/x86/x86_32/domain_build.c index 46c4093139..b61c7082aa 100644 --- a/xen/arch/x86/x86_32/domain_build.c +++ b/xen/arch/x86/x86_32/domain_build.c @@ -384,7 +384,7 @@ int construct_dom0(struct domain *d, #ifndef NDEBUG if (0) /* XXXXX DO NOT CHECK IN ENABLED !!! (but useful for testing so leave) */ { - shadow_mode_enable(d, SHM_test); + shadow_mode_enable(d, SHM_enable); update_pagetables(ed); /* XXX SMP */ } #endif diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h index 8d9d8a038b..190ffe1669 100644 --- a/xen/include/asm-x86/shadow.h +++ b/xen/include/asm-x86/shadow.h @@ -17,17 +17,21 @@ #define PSH_pfn_mask ((1<<21)-1) /* Shadow PT operation mode : shadow-mode variable in arch_domain. */ -#define SHM_test (1) /* just run domain on shadow PTs */ -#define SHM_logdirty (2) /* log pages that are dirtied */ -#define SHM_translate (3) /* lookup machine pages in translation table */ -#define SHM_cow (4) /* copy on write all dirtied pages */ -#define SHM_full_32 (8) /* full virtualization for 32-bit */ + +#define SHM_enable (1<<0) /* we're in one of the shadow modes */ +#define SHM_log_dirty (1<<1) /* enable log dirty mode */ +#define SHM_translate (1<<2) /* do p2m tranaltion on guest tables */ +#define SHM_external (1<<3) /* external page table, not used by Xen */ + +#define shadow_mode_enabled(_d) ((_d)->arch.shadow_mode) +#define shadow_mode_log_dirty(_d) ((_d)->arch.shadow_mode & SHM_log_dirty) +#define shadow_mode_translate(_d) ((_d)->arch.shadow_mode & SHM_translate) +#define shadow_mode_external(_d) ((_d)->arch.shadow_mode & SHM_external) #define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START) #define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \ (SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)))) -#define shadow_mode(_d) ((_d)->arch.shadow_mode) #define shadow_lock_init(_d) spin_lock_init(&(_d)->arch.shadow_lock) #define shadow_lock(_d) spin_lock(&(_d)->arch.shadow_lock) #define shadow_unlock(_d) spin_unlock(&(_d)->arch.shadow_lock) @@ -49,19 +53,19 @@ extern void vmx_shadow_invlpg(struct domain *, unsigned long); #endif #define __mfn_to_gpfn(_d, mfn) \ - ( (shadow_mode(_d) == SHM_full_32) \ + ( (shadow_mode_translate(_d)) \ ? machine_to_phys_mapping[(mfn)] \ : (mfn) ) #define __gpfn_to_mfn(_d, gpfn) \ - ( (shadow_mode(_d) == SHM_full_32) \ + ( (shadow_mode_translate(_d)) \ ? phys_to_machine_mapping(gpfn) \ : (gpfn) ) extern void __shadow_mode_disable(struct domain *d); static inline void shadow_mode_disable(struct domain *d) { - if ( shadow_mode(d) ) + if ( shadow_mode_enabled(d) ) __shadow_mode_disable(d); } @@ -69,7 +73,7 @@ extern unsigned long shadow_l2_table( struct domain *d, unsigned long gpfn); static inline void shadow_invalidate(struct exec_domain *ed) { - if ( shadow_mode(ed->domain) != SHM_full_32 ) + if ( !shadow_mode_translate(ed->domain)) BUG(); memset(ed->arch.shadow_vtable, 0, PAGE_SIZE); } @@ -119,33 +123,40 @@ struct shadow_status { static inline void __shadow_get_l2e( struct exec_domain *ed, unsigned long va, unsigned long *sl2e) { - if ( shadow_mode(ed->domain) == SHM_full_32 ) { - *sl2e = l2_pgentry_val(ed->arch.shadow_vtable[l2_table_offset(va)]); - } - else if ( shadow_mode(ed->domain) ) { - *sl2e = l2_pgentry_val(shadow_linear_l2_table[l2_table_offset(va)]); + if ( likely(shadow_mode_enabled(ed->domain)) ) { + if ( shadow_mode_translate(ed->domain) ) + *sl2e = l2_pgentry_val( + ed->arch.shadow_vtable[l2_table_offset(va)]); + else + *sl2e = l2_pgentry_val( + shadow_linear_l2_table[l2_table_offset(va)]); } - else + else { + BUG(); /* why do we need this case? */ *sl2e = l2_pgentry_val(linear_l2_table[l2_table_offset(va)]); + } } static inline void __shadow_set_l2e( struct exec_domain *ed, unsigned long va, unsigned long value) { - if ( shadow_mode(ed->domain) == SHM_full_32 ) { - ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value); - } - else if ( shadow_mode(ed->domain) ) { - shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value); + if ( likely(shadow_mode_enabled(ed->domain)) ) { + if ( shadow_mode_translate(ed->domain) ) + ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value); + else + shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value); } else + { + BUG(); /* why do we need this case? */ linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value); + } } static inline void __guest_get_l2e( struct exec_domain *ed, unsigned long va, unsigned long *l2e) { - *l2e = ( shadow_mode(ed->domain) == SHM_full_32) ? + *l2e = ( shadow_mode_translate(ed->domain) ) ? l2_pgentry_val(ed->arch.vpagetable[l2_table_offset(va)]) : l2_pgentry_val(linear_l2_table[l2_table_offset(va)]); } @@ -153,7 +164,7 @@ static inline void __guest_get_l2e( static inline void __guest_set_l2e( struct exec_domain *ed, unsigned long va, unsigned long value) { - if ( shadow_mode(ed->domain) == SHM_full_32 ) + if ( shadow_mode_translate(ed->domain) ) { unsigned long pfn; @@ -237,7 +248,7 @@ static inline void l1pte_write_fault( ASSERT(gpte & _PAGE_RW); gpte |= _PAGE_DIRTY | _PAGE_ACCESSED; - if ( shadow_mode(d) == SHM_logdirty ) + if ( shadow_mode_log_dirty(d) ) __mark_dirty(d, pfn); spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK); @@ -258,7 +269,7 @@ static inline void l1pte_read_fault( gpte |= _PAGE_ACCESSED; spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK); - if ( (shadow_mode(d) == SHM_logdirty) || ! (gpte & _PAGE_DIRTY) ) + if ( shadow_mode_log_dirty(d) || !(gpte & _PAGE_DIRTY) ) spte &= ~_PAGE_RW; SH_VVLOG("l1pte_read_fault: updating spte=0x%p gpte=0x%p", spte, gpte); @@ -271,47 +282,28 @@ static inline void l1pte_propagate_from_guest( { unsigned long gpte = *gpte_p; unsigned long spte = *spte_p; - unsigned long host_pfn, host_gpte; + unsigned long pfn = gpte >> PAGE_SHIFT; + unsigned long mfn = __gpfn_to_mfn(d, pfn); + #if SHADOW_VERBOSE_DEBUG unsigned long old_spte = spte; #endif - switch ( shadow_mode(d) ) - { - case SHM_test: - spte = 0; - if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == - (_PAGE_PRESENT|_PAGE_ACCESSED) ) - spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW); - break; - - case SHM_logdirty: - spte = 0; - if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == - (_PAGE_PRESENT|_PAGE_ACCESSED) ) - spte = gpte & ~_PAGE_RW; - break; - - case SHM_full_32: - spte = 0; - - if ( mmio_space(gpte & 0xFFFFF000) ) - { - *spte_p = spte; - return; - } + if ( shadow_mode_external(d) && mmio_space(gpte & 0xFFFFF000) ) { + *spte_p = 0; + return; + } + + spte = 0; + if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == + (_PAGE_PRESENT|_PAGE_ACCESSED) ) { - host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT); - host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK); - - if ( (host_gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == - (_PAGE_PRESENT|_PAGE_ACCESSED) ) - spte = (host_gpte & _PAGE_DIRTY) ? - host_gpte : (host_gpte & ~_PAGE_RW); - - break; + spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK); + + if ( shadow_mode_log_dirty(d) || !(gpte & _PAGE_DIRTY) ) + spte &= ~_PAGE_RW; } - + #if SHADOW_VERBOSE_DEBUG if ( old_spte || spte || gpte ) SH_VVLOG("l1pte_propagate_from_guest: gpte=0x%p, old spte=0x%p, new spte=0x%p ", gpte, old_spte, spte); @@ -321,6 +313,8 @@ static inline void l1pte_propagate_from_guest( *spte_p = spte; } + + static inline void l2pde_general( struct domain *d, unsigned long *gpde_p, @@ -342,7 +336,7 @@ static inline void l2pde_general( if ( (frame_table[sl1mfn].u.inuse.type_info & PGT_type_mask) == PGT_l2_page_table ) { - if ( shadow_mode(d) != SHM_full_32 ) + if ( !shadow_mode_translate(d) ) spde = gpde & ~_PAGE_RW; } @@ -474,7 +468,7 @@ static inline unsigned long get_shadow_status( { unsigned long res; - ASSERT(shadow_mode(d)); + ASSERT(shadow_mode_enabled(d)); /* * If we get here we know that some sort of update has happened to the @@ -482,11 +476,13 @@ static inline unsigned long get_shadow_status( * has changed type. If we're in log dirty mode, we should set the * appropriate bit in the dirty bitmap. * N.B. The VA update path doesn't use this and is handled independently. + + XXX need to think this through for vmx guests, but probably OK */ shadow_lock(d); - if ( shadow_mode(d) == SHM_logdirty ) + if ( shadow_mode_log_dirty(d) ) __mark_dirty(d, gpfn); if ( !(res = __shadow_status(d, gpfn)) ) @@ -744,7 +740,7 @@ static inline void __update_pagetables(struct exec_domain *ed) smfn = shadow_l2_table(d, gpfn); #ifdef CONFIG_VMX else - if (d->arch.shadow_mode == SHM_full_32) + if (shadow_mode_translate(ed->domain) ) { vmx_update_shadow_state(ed, gpfn, smfn); } @@ -752,13 +748,13 @@ static inline void __update_pagetables(struct exec_domain *ed) ed->arch.shadow_table = mk_pagetable(smfn<arch.shadow_mode != SHM_full_32) + if ( !shadow_mode_external(ed->domain) ) ed->arch.monitor_table = ed->arch.shadow_table; } static inline void update_pagetables(struct exec_domain *ed) { - if ( unlikely(shadow_mode(ed->domain)) ) + if ( unlikely(shadow_mode_enabled(ed->domain)) ) { SH_VVLOG("update_pagetables( gptbase=%p, mode=%d )", pagetable_val(ed->arch.guest_table), diff --git a/xen/include/public/dom0_ops.h b/xen/include/public/dom0_ops.h index 58b49a7564..6ba044300f 100644 --- a/xen/include/public/dom0_ops.h +++ b/xen/include/public/dom0_ops.h @@ -267,7 +267,7 @@ typedef struct { #define DOM0_SHADOW_CONTROL_OP_OFF 0 #define DOM0_SHADOW_CONTROL_OP_ENABLE_TEST 1 #define DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY 2 -#define DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE 3 + #define DOM0_SHADOW_CONTROL_OP_FLUSH 10 /* table ops */ #define DOM0_SHADOW_CONTROL_OP_CLEAN 11 #define DOM0_SHADOW_CONTROL_OP_PEEK 12 -- 2.30.2